curr->arch.hvm_vcpu.io_state = HVMIO_none;
break;
case X86EMUL_UNHANDLEABLE:
- hvm_send_assist_req(curr);
- rc = (p_data != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
+ rc = (!hvm_send_assist_req(curr) || (p_data != NULL)
+ ? X86EMUL_RETRY : X86EMUL_OKAY);
break;
default:
BUG();
}
}
-void hvm_send_assist_req(struct vcpu *v)
+bool_t hvm_send_assist_req(struct vcpu *v)
{
ioreq_t *p;
if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
- return; /* implicitly bins the i/o operation */
+ return 0; /* implicitly bins the i/o operation */
p = &get_ioreq(v)->vp_ioreq;
if ( unlikely(p->state != STATE_IOREQ_NONE) )
/* This indicates a bug in the device model. Crash the domain. */
gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
domain_crash(v->domain);
- return;
+ return 0;
}
prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
*/
p->state = STATE_IOREQ_READY;
notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
+
+ return 1;
}
void hvm_hlt(unsigned long rflags)
void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
-void hvm_send_assist_req(struct vcpu *v);
+bool_t hvm_send_assist_req(struct vcpu *v);
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc);
u64 hvm_get_guest_tsc(struct vcpu *v);